const runtime.bucketCnt
107 uses
runtime (current package)
map.go#L67: bucketCnt = 1 << bucketCntBits
map.go#L154: tophash [bucketCnt]uint8
map.go#L433: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L445: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L494: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L506: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L538: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L550: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L620: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L625: elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L643: elem = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L667: elem = add(insertk, bucketCnt*uintptr(t.keysize))
map.go#L733: for i := uintptr(0); i < bucketCnt; i++ {
map.go#L754: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.elemsize))
map.go#L767: if i == bucketCnt-1 {
map.go#L786: i = bucketCnt - 1
map.go#L850: it.offset = uint8(r >> h.B & (bucketCnt - 1))
map.go#L911: for ; i < bucketCnt; i++ {
map.go#L912: offi := (i + it.offset) & (bucketCnt - 1)
map.go#L922: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+uintptr(offi)*uintptr(t.elemsize))
map.go#L1084: return count > bucketCnt && uintptr(count) > loadFactorNum*(bucketShift(B)/loadFactorDen)
map.go#L1162: x.e = add(x.k, bucketCnt*uintptr(t.keysize))
map.go#L1170: y.e = add(y.k, bucketCnt*uintptr(t.keysize))
map.go#L1175: e := add(k, bucketCnt*uintptr(t.keysize))
map.go#L1176: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, uintptr(t.keysize)), add(e, uintptr(t.elemsize)) {
map.go#L1222: if dst.i == bucketCnt {
map.go#L1226: dst.e = add(dst.k, bucketCnt*uintptr(t.keysize))
map.go#L1228: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map.go#L1304: if t.key.align > bucketCnt {
map.go#L1307: if t.elem.align > bucketCnt {
map.go#L1316: if bucketCnt < 8 {
map_fast32.go#L44: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L46: return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
map_fast32.go#L84: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L86: return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize)), true
map_fast32.go#L126: for i := uintptr(0); i < bucketCnt; i++ {
map_fast32.go#L166: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast32.go#L175: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
map_fast32.go#L216: for i := uintptr(0); i < bucketCnt; i++ {
map_fast32.go#L256: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast32.go#L265: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*4+inserti*uintptr(t.elemsize))
map_fast32.go#L298: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 4) {
map_fast32.go#L310: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.elemsize))
map_fast32.go#L319: if i == bucketCnt-1 {
map_fast32.go#L338: i = bucketCnt - 1
map_fast32.go#L386: x.e = add(x.k, bucketCnt*4)
map_fast32.go#L394: y.e = add(y.k, bucketCnt*4)
map_fast32.go#L399: e := add(k, bucketCnt*4)
map_fast32.go#L400: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 4), add(e, uintptr(t.elemsize)) {
map_fast32.go#L422: if dst.i == bucketCnt {
map_fast32.go#L426: dst.e = add(dst.k, bucketCnt*4)
map_fast32.go#L428: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map_fast64.go#L44: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L46: return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
map_fast64.go#L84: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L86: return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize)), true
map_fast64.go#L126: for i := uintptr(0); i < bucketCnt; i++ {
map_fast64.go#L166: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast64.go#L175: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
map_fast64.go#L216: for i := uintptr(0); i < bucketCnt; i++ {
map_fast64.go#L256: insertb.tophash[inserti&(bucketCnt-1)] = tophash(hash) // mask inserti to avoid bounds checks
map_fast64.go#L265: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*8+inserti*uintptr(t.elemsize))
map_fast64.go#L298: for i, k := uintptr(0), b.keys(); i < bucketCnt; i, k = i+1, add(k, 8) {
map_fast64.go#L312: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.elemsize))
map_fast64.go#L321: if i == bucketCnt-1 {
map_fast64.go#L340: i = bucketCnt - 1
map_fast64.go#L388: x.e = add(x.k, bucketCnt*8)
map_fast64.go#L396: y.e = add(y.k, bucketCnt*8)
map_fast64.go#L401: e := add(k, bucketCnt*8)
map_fast64.go#L402: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 8), add(e, uintptr(t.elemsize)) {
map_fast64.go#L424: if dst.i == bucketCnt {
map_fast64.go#L428: dst.e = add(dst.k, bucketCnt*8)
map_fast64.go#L430: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check
map_faststr.go#L30: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L39: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L45: keymaybe := uintptr(bucketCnt)
map_faststr.go#L46: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L55: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L65: if keymaybe != bucketCnt {
map_faststr.go#L71: if keymaybe != bucketCnt {
map_faststr.go#L74: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize))
map_faststr.go#L95: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L101: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L125: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L134: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L140: keymaybe := uintptr(bucketCnt)
map_faststr.go#L141: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L150: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L160: if keymaybe != bucketCnt {
map_faststr.go#L166: if keymaybe != bucketCnt {
map_faststr.go#L169: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+keymaybe*uintptr(t.elemsize)), true
map_faststr.go#L190: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L196: return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize)), true
map_faststr.go#L238: for i := uintptr(0); i < bucketCnt; i++ {
map_faststr.go#L285: insertb.tophash[inserti&(bucketCnt-1)] = top // mask inserti to avoid bounds checks
map_faststr.go#L293: elem := add(unsafe.Pointer(insertb), dataOffset+bucketCnt*2*goarch.PtrSize+inserti*uintptr(t.elemsize))
map_faststr.go#L328: for i, kptr := uintptr(0), b.keys(); i < bucketCnt; i, kptr = i+1, add(kptr, 2*goarch.PtrSize) {
map_faststr.go#L338: e := add(unsafe.Pointer(b), dataOffset+bucketCnt*2*goarch.PtrSize+i*uintptr(t.elemsize))
map_faststr.go#L347: if i == bucketCnt-1 {
map_faststr.go#L366: i = bucketCnt - 1
map_faststr.go#L414: x.e = add(x.k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L422: y.e = add(y.k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L427: e := add(k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L428: for i := 0; i < bucketCnt; i, k, e = i+1, add(k, 2*goarch.PtrSize), add(e, uintptr(t.elemsize)) {
map_faststr.go#L450: if dst.i == bucketCnt {
map_faststr.go#L454: dst.e = add(dst.k, bucketCnt*2*goarch.PtrSize)
map_faststr.go#L456: dst.b.tophash[dst.i&(bucketCnt-1)] = top // mask dst.i as an optimization, to avoid a bounds check